load libraries
import os
import cv2
import glob
import numpy as np
from keras.models import *
from keras.layers import *
from keras.applications import *
from keras.preprocessing.image import *
basedir = "/ext/Data/distracted_driver_detection/"
model_image_size = 299
print("-------- loading train data")
X_train = list()
y_train = list()
for i in range(10):
dir = os.path.join(basedir, "train", "c%d"%i)
image_files = glob.glob(os.path.join(dir,"*.jpg"))
print("loding {}, image count={}".format(dir, len(image_files)))
for image_file in image_files:
image = cv2.imread(image_file)
X_train.append(cv2.resize(image, (model_image_size, model_image_size)))
label = np.zeros(10, dtype=np.uint8)
label[i]=1
y_train.append(label)
X_train = np.array(X_train)
y_train = np.array(y_train)
print("-------- loading valid data")
X_valid = list()
y_valid = list()
for i in range(10):
dir = os.path.join(basedir, "valid", "c%d"%i)
image_files = glob.glob(os.path.join(dir,"*.jpg"))
print("loding {}, image count={}".format(dir, len(image_files)))
for image_file in image_files:
image = cv2.imread(image_file)
X_valid.append(cv2.resize(image, (model_image_size, model_image_size)))
label = np.zeros(10, dtype=np.uint8)
label[i]=1
y_valid.append(label)
X_valid = np.array(X_valid)
y_valid = np.array(y_valid)
print(X_train.shape)
print(y_train.shape)
print(X_valid.shape)
print(y_valid.shape)
base_model = InceptionV3(input_tensor=Input((model_image_size, model_image_size, 3)), weights='imagenet', include_top=False)
for layers in base_model.layers:
layers.trainable = False
x = GlobalAveragePooling2D()(base_model.output)
x = Dropout(0.25)(x)
x = Dense(10, activation='softmax')(x)
model = Model(base_model.input, x)
model.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy'])
# model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
print("done")
model.fit(X_train, y_train, batch_size=16, epochs=10, validation_data=(X_valid, y_valid))
model.save("models/inceptionV3-mymodel.h5")
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.models import *
model = load_model("models/inceptionV3-mymodel.h5")
print("load successed")
SVG(model_to_dot(model).create(prog='dot', format='svg'))
http://cnnlocalization.csail.mit.edu/

$cam = (P-0.5)*w*output$
z = zip([x.name for x in model.layers], range(len(model.layers)))
for k, v in z:
print("{} - {}".format(k,v))
import matplotlib.pyplot as plt
import random
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
def show_heatmap_image(model_show, weights_show):
test_dir = os.path.join(basedir, "test", "test" )
image_files = glob.glob(os.path.join(test_dir,"*"))
print(len(image_files))
plt.figure(figsize=(12, 14))
for i in range(16):
plt.subplot(4, 4, i+1)
img = cv2.imread(image_files[2000*i+113])
img = cv2.resize(img, (model_image_size, model_image_size))
x = img.copy()
x.astype(np.float32)
out, predictions = model_show.predict(np.expand_dims(x, axis=0))
predictions = predictions[0]
out = out[0]
max_idx = np.argmax(predictions)
prediction = predictions[max_idx]
status = ["safe driving", " texting - right", "phone - right", "texting - left", "phone - left",
"operation radio", "drinking", "reaching behind", "hair and makeup", "talking"]
plt.title('c%d |%s| %.2f%%' % (max_idx , status[max_idx], prediction*100))
cam = (prediction - 0.5) * np.matmul(out, weights_show)
cam = cam[:,:,max_idx]
cam -= cam.min()
cam /= cam.max()
cam -= 0.2
cam /= 0.8
cam = cv2.resize(cam, (model_image_size, model_image_size))
heatmap = cv2.applyColorMap(np.uint8(255*cam), cv2.COLORMAP_JET)
heatmap[np.where(cam <= 0.2)] = 0
out = cv2.addWeighted(img, 0.8, heatmap, 0.4, 0)
plt.axis('off')
plt.imshow(out[:,:,::-1])
print("done")
weights = model.layers[313].get_weights()[0]
layer_output = model.layers[310].output
model2 = Model(model.input, [layer_output, model.output])
print("layer_output {0}".format(layer_output))
print("weights shape {0}".format(weights.shape))
show_heatmap_image(model2, weights)